From c856a00e75a918b1eb6530adb23011bfb4f8f5fc Mon Sep 17 00:00:00 2001 From: Debian Qt/KDE Maintainers Date: Wed, 31 Dec 2025 13:28:57 +0300 Subject: [PATCH] QReadWriteLock: fix data race on weakly-ordered memory architectures Origin: upstream, https://code.qt.io/cgit/qt/qtbase.git/commit?id=4fd88011fa7975ce Last-Update: 2025-12-14 The fix changes the relaxed load of d_ptr in lockFor{Read,Write} after the acquire of the mutex to an acquire load, to establish synchronization with the release store of d_ptr when converting from an uncontended lock to a contended lock. Gbp-Pq: Name qreadwritelock_data_race_2.diff --- src/corelib/thread/qreadwritelock.cpp | 8 +- .../qreadwritelock/tst_qreadwritelock.cpp | 106 ++++++++++++++++++ 2 files changed, 110 insertions(+), 4 deletions(-) diff --git a/src/corelib/thread/qreadwritelock.cpp b/src/corelib/thread/qreadwritelock.cpp index 84d73444b..1e286d581 100644 --- a/src/corelib/thread/qreadwritelock.cpp +++ b/src/corelib/thread/qreadwritelock.cpp @@ -267,14 +267,14 @@ bool QReadWriteLock::tryLockForRead(int timeout) return d->recursiveLockForRead(timeout); auto lock = qt_unique_lock(d->mutex); - if (d != d_ptr.loadRelaxed()) { + if (QReadWriteLockPrivate *dd = d_ptr.loadAcquire(); d != dd) { // d_ptr has changed: this QReadWriteLock was unlocked before we had // time to lock d->mutex. // We are holding a lock to a mutex within a QReadWriteLockPrivate // that is already released (or even is already re-used). That's ok // because the QFreeList never frees them. // Just unlock d->mutex (at the end of the scope) and retry. - d = d_ptr.loadAcquire(); + d = dd; continue; } return d->lockForRead(timeout); @@ -377,11 +377,11 @@ bool QReadWriteLock::tryLockForWrite(int timeout) return d->recursiveLockForWrite(timeout); auto lock = qt_unique_lock(d->mutex); - if (d != d_ptr.loadRelaxed()) { + if (QReadWriteLockPrivate *dd = d_ptr.loadAcquire(); d != dd) { // The mutex was unlocked before we had time to lock the mutex. // We are holding to a mutex within a QReadWriteLockPrivate that is already released // (or even is already re-used) but that's ok because the QFreeList never frees them. - d = d_ptr.loadAcquire(); + d = dd; continue; } return d->lockForWrite(timeout); diff --git a/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp b/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp index 47bae585a..56f4a0865 100644 --- a/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp +++ b/tests/auto/corelib/thread/qreadwritelock/tst_qreadwritelock.cpp @@ -85,6 +85,7 @@ private slots: void multipleReadersLoop(); void multipleWritersLoop(); void multipleReadersWritersLoop(); + void heavyLoadLocks(); void countingTest(); void limitedReaders(); void deleteOnUnlock(); @@ -635,6 +636,111 @@ public: } }; +class HeavyLoadLockThread : public QThread +{ +public: + QReadWriteLock &testRwlock; + const qsizetype iterations; + const int numThreads; + inline HeavyLoadLockThread(QReadWriteLock &l, qsizetype iters, int numThreads, QVector &counters): + testRwlock(l), + iterations(iters), + numThreads(numThreads), + counters(counters) + { } + +private: + QVector &counters; + QAtomicInt *getCounter(qsizetype index) + { + QReadLocker locker(&testRwlock); + /* + The index is increased monotonically, so the index + being requested should be always within or at the end of the + counters vector. + */ + Q_ASSERT(index <= counters.size()); + if (counters.size() <= index || counters[index] == nullptr) { + locker.unlock(); + QWriteLocker wlocker(&testRwlock); + if (counters.size() <= index) + counters.resize(index + 1, nullptr); + if (counters[index] == nullptr) + counters[index] = new QAtomicInt(0); + return counters[index]; + } + return counters[index]; + } + void releaseCounter(qsizetype index) + { + QWriteLocker locker(&testRwlock); + delete counters[index]; + counters[index] = nullptr; + } + +public: + void run() override + { + for (qsizetype i = 0; i < iterations; ++i) { + QAtomicInt *counter = getCounter(i); + /* + Here each counter is accessed by each thread + and increaed only once. As a result, when the + counter reaches numThreads, i.e. the fetched + value before the increment is numThreads-1, + we know all threads have accessed this counter + and we can delete it safely. + */ + int prev = counter->fetchAndAddRelaxed(1); + if (prev == numThreads - 1) { +#ifdef QT_BUILDING_UNDER_TSAN + /* + Under TSAN, deleting and freeing an object + will trigger a write operation on the memory + of the object. Since we used fetchAndAddRelaxed + to update the counter, TSAN will report a data + race when deleting the counter here. To avoid + the false positive, we simply reset the counter + to 0 here, with ordered semantics to establish + the sequence to ensure the the free-ing option + happens after all fetchAndAddRelaxed operations + in other threads. + + When not building under TSAN, deleting the counter + will not result in any data read or written to the + memory region of the counter, so no data race will + happen. + */ + counter->fetchAndStoreOrdered(0); +#endif + releaseCounter(i); + } + } + } +}; + +/* + Multiple threads racing acquiring and releasing + locks on the same indices. +*/ + +void tst_QReadWriteLock::heavyLoadLocks() +{ + constexpr qsizetype iterations = 65536 * 4; + constexpr int numThreads = 8; + QVector counters; + QReadWriteLock testLock; + std::array, numThreads> threads; + for (auto &thread : threads) + thread = std::make_unique(testLock, iterations, numThreads, counters); + for (auto &thread : threads) + thread->start(); + for (auto &thread : threads) + thread->wait(); + QVERIFY(counters.size() == iterations); + for (qsizetype i = 0; i < iterations; ++i) + QVERIFY(counters[i] == nullptr); +} /* A writer acquires a read-lock, a reader locks -- 2.30.2